In [15]:
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
from PIL import Image
import tensorflow_hub as hub
import os
import logging
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
Using TensorFlow backend.
In [2]:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'

Load Dataset from Tensorflow

In [5]:
train_dir = "input/train"
test_dir = "input/test"


print("Training set:")
num_glaucoma = len(os.listdir(os.path.join(train_dir, 'Glaucoma')))
num_nonGlaucoma = len(os.listdir(os.path.join(train_dir, 'Non Glaucoma')))
print(f"Glaucoma={num_glaucoma}")
print(f"Non Glaucoma={num_nonGlaucoma}")

print("Testing set:")
print(f"Glaucoma={len(os.listdir(os.path.join(test_dir, 'glaucoma')))}")
print(f"Non Glaucoma={len(os.listdir(os.path.join(test_dir, 'non glaucoma')))}")


glaucoma = os.listdir("input/train/Glaucoma")
glaucoma_dir = "input/train/Glaucoma"
Training set:
Glaucoma=285
Non Glaucoma=511
Testing set:
Glaucoma=88
Non Glaucoma=138

Visualizing the Glaucoma Images

In [9]:
plt.figure(figsize=(20, 10))

for i in range(9):
    plt.subplot(3, 3, i + 1)
    img = plt.imread(os.path.join(glaucoma_dir, glaucoma[i]))
    plt.imshow(img, cmap = plt.cm.binary)
    plt.axis('off')
    
plt.tight_layout()

Visualizing Non Glaucoma Images

In [10]:
normal = os.listdir("input/train/Non Glaucoma")
normal_dir = "input/train/Non Glaucoma"

plt.figure(figsize=(20, 10))

for i in range(9):
    plt.subplot(3, 3, i + 1)
    img = plt.imread(os.path.join(normal_dir, normal[i]))
    plt.imshow(img, cmap='gray')
    plt.axis('off')
    
plt.tight_layout()
In [13]:
glaucoma_img = os.listdir("input/train/Glaucoma")[1]
sample_img = plt.imread(os.path.join(glaucoma_dir, glaucoma_img))
plt.imshow(sample_img, cmap='gray')
plt.colorbar()
plt.title('Raw Glaucoma Image')
print(sample_img.shape)
(1488, 2240, 3)

Creating a Pipeline for the Dataset

In [38]:
datagen = ImageDataGenerator( rotation_range=60,
    width_shift_range=0.2,
    height_shift_range=0.2,
   
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')
train_set = datagen.flow_from_directory('input/train/', class_mode='binary', batch_size=64,  target_size=(160, 160))
# load and iterate validation dataset
test_set = datagen.flow_from_directory('input/test/', class_mode='binary', batch_size=1, target_size = (160,160))

validation_set = datagen.flow_from_directory('input/random/', class_mode='binary', batch_size=1, target_size = (160,160))
Found 796 images belonging to 2 classes.
Found 226 images belonging to 2 classes.
Found 8 images belonging to 2 classes.

Using the Model from Tensorflow HUB

In [17]:
URL = "https://tfhub.dev/google/imagenet/mobilenet_v2_035_160/classification/4"
feature_extract = hub.KerasLayer(URL, input_shape =(160,160,3))
feature_extract.trainable = False
    
In [18]:
mymodel = tf.keras.Sequential([feature_extract,
                               tf.keras.layers.Dense(8, activation='softmax')])

mymodel.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
keras_layer (KerasLayer)     (None, 1001)              1692489   
_________________________________________________________________
dense (Dense)                (None, 8)                 8016      
=================================================================
Total params: 1,700,505
Trainable params: 8,016
Non-trainable params: 1,692,489
_________________________________________________________________
In [19]:
mymodel.compile(optimizer='adam',
                loss = 'sparse_categorical_crossentropy',
                metrics = ['accuracy'])
In [35]:
weight_for_0 = num_glaucoma / (num_nonGlaucoma + num_glaucoma)
weight_for_1 = num_nonGlaucoma / (num_nonGlaucoma + num_glaucoma)

class_weight = {0: weight_for_0, 1: weight_for_1}

print(f"Weight for class 0 Normal: {weight_for_0:.2f}")
print(f"Weight for class 1 Non Glaucoma: {weight_for_1:.2f}")
Weight for class 0 Normal: 0.36
Weight for class 1 Pneumonia: 0.64

TRAINING

In [40]:
EPOCHS = 30
#validation loss doesn't reduces for 3 consecutive epoch stop training..
early_stopping = tf.keras.callbacks.EarlyStopping(monitor = 'val_loss', patience = 2)
history = mymodel.fit(train_set,
                     epochs = EPOCHS,
                      validation_data = validation_set,
                     class_weight = class_weight,
                     callbacks= [early_stopping]
                     )
Train for 13 steps, validate for 8 steps
Epoch 1/30
13/13 [==============================] - 67s 5s/step - loss: 0.0210 - accuracy: 0.9874 - val_loss: 0.0187 - val_accuracy: 1.0000
Epoch 2/30
13/13 [==============================] - 64s 5s/step - loss: 0.0215 - accuracy: 0.9824 - val_loss: 0.0025 - val_accuracy: 1.0000
Epoch 3/30
13/13 [==============================] - 64s 5s/step - loss: 0.0210 - accuracy: 0.9849 - val_loss: 0.0018 - val_accuracy: 1.0000
Epoch 4/30
13/13 [==============================] - 64s 5s/step - loss: 0.0270 - accuracy: 0.9724 - val_loss: 0.0010 - val_accuracy: 1.0000
Epoch 5/30
13/13 [==============================] - 64s 5s/step - loss: 0.0228 - accuracy: 0.9812 - val_loss: 0.0092 - val_accuracy: 1.0000
Epoch 6/30
13/13 [==============================] - 64s 5s/step - loss: 0.0202 - accuracy: 0.9862 - val_loss: 0.0037 - val_accuracy: 1.0000

Saving the Model

In [41]:
mymodel.save("glaucoma.h5")

Loading the Saved Model

In [42]:
save_model_path = './glaucoma.h5'
# new_model = tf.keras.models.load_model(save_model_path)
# print(reloaded_model.get_config())

# reloaded_model = tf.keras.experimental.load_from_saved_model(save_model_path, custom_objects={'KerasLayer':hub.KerasLayer})
# print(reloaded_model.get_config())
new_model = tf.keras.models.load_model('./glaucoma.h5',custom_objects={'KerasLayer':hub.KerasLayer})
new_model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
keras_layer (KerasLayer)     (None, 1001)              1692489   
_________________________________________________________________
dense (Dense)                (None, 8)                 8016      
=================================================================
Total params: 1,700,505
Trainable params: 8,016
Non-trainable params: 1,692,489
_________________________________________________________________
In [44]:
loss,accuracy = new_model.evaluate(test_set)
226/226 [==============================] - 20s 89ms/step - loss: 3.8401 - accuracy: 0.3894
In [43]:
plt.figure(figsize=(12, 8))

plt.subplot(2, 1, 1)
plt.plot(history.history['loss'], label='Loss')
plt.plot(history.history['val_loss'], label='Val Loss')
plt.legend()
plt.title('Training Loss Vs Validation Loss')

plt.subplot(2, 1, 2)
plt.plot(history.history['accuracy'], label='Accuracy')
plt.plot(history.history['val_accuracy'], label='Val Accuracy')
plt.legend()
plt.title('Training Vs Validation Accuracy')
Out[43]:
Text(0.5, 1.0, 'Training Vs Validation Accuracy')
In [51]:
def process_image(image_numpy):
    image_tf = tf.convert_to_tensor(image_numpy, np.float32)
    resized_image = tf.image.resize(image_tf,(160,160))
    resized_image /=255
    return resized_image.numpy()
In [52]:
def predictor(image_path, model, top_k = 1):
    im = Image.open(image_path)
    test_image = np.asarray(im)
    processed_image = process_image(test_image)
    
    perfect_image = np.expand_dims(processed_image, axis=0)
    
    predicts = model.predict(perfect_image)
    
    classes = []
    
    prob,labels  = tf.math.top_k(predicts, k = top_k, sorted = True)
    
    prob = prob.numpy()
    prob = prob.squeeze()
    probs = list(prob)
    
    label = labels.numpy()
    label = label.squeeze()
    
    classes = list(label)
    
    return probs,classes
In [67]:
image_path = "input/random/glaucoma/glimage51prime.jpg"
im = Image.open(image_path)
orgi_image = np.asarray(im)

prob,labels = predictor(image_path, new_model,2)

class_names=["Glaucoma", "Non Glaucoma"]
topk_classnames = []

limit = len(labels)
for i in range(limit):
    label = labels[i]
    topk_classnames.append(class_names[label])
    

fig, (ax1) = plt.subplots(figsize=(12,10))
ax1.imshow(orgi_image, cmap = plt.cm.binary)
ax1.axis("off")
ax1.set_title(topk_classnames[0])

plt.tight_layout()
In [68]:
!jupyter nbconvert --to html Glaucoma.ipynb
[NbConvertApp] Converting notebook Glaucoma.ipynb to html
[NbConvertApp] Writing 11449928 bytes to Glaucoma.html
In [ ]: